memset(d->arch.pirq_irq, 0,
d->nr_pirqs * sizeof(*d->arch.pirq_irq));
- d->arch.irq_pirq = xmalloc_array(int, nr_irqs);
- if ( !d->arch.irq_pirq )
+ if ( (rc = init_domain_irq_mapping(d)) != 0 )
goto fail;
- memset(d->arch.irq_pirq, 0,
- nr_irqs * sizeof(*d->arch.irq_pirq));
-
- for ( i = 1; platform_legacy_irq(i); ++i )
- if ( !IO_APIC_IRQ(i) )
- d->arch.irq_pirq[i] = d->arch.pirq_irq[i] = i;
if ( is_hvm_domain(d) )
{
d->arch.pirq_emuirq = xmalloc_array(int, d->nr_pirqs);
- d->arch.emuirq_pirq = xmalloc_array(int, nr_irqs);
- if ( !d->arch.pirq_emuirq || !d->arch.emuirq_pirq )
+ if ( !d->arch.pirq_emuirq )
goto fail;
for (i = 0; i < d->nr_pirqs; i++)
d->arch.pirq_emuirq[i] = IRQ_UNBOUND;
- for (i = 0; i < nr_irqs; i++)
- d->arch.emuirq_pirq[i] = IRQ_UNBOUND;
}
d->is_dying = DOMDYING_dead;
vmce_destroy_msr(d);
xfree(d->arch.pirq_irq);
- xfree(d->arch.irq_pirq);
xfree(d->arch.pirq_emuirq);
- xfree(d->arch.emuirq_pirq);
+ cleanup_domain_irq_mapping(d);
free_xenheap_page(d->shared_info);
if ( paging_initialised )
paging_final_teardown(d);
free_xenheap_page(d->shared_info);
xfree(d->arch.pirq_irq);
- xfree(d->arch.irq_pirq);
xfree(d->arch.pirq_emuirq);
- xfree(d->arch.emuirq_pirq);
+ cleanup_domain_irq_mapping(d);
}
unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long guest_cr4)
return desc;
}
+static int prepare_domain_irq_pirq(struct domain *d, int irq, int pirq)
+{
+ int err = radix_tree_insert(&d->arch.irq_pirq, irq,
+ radix_tree_int_to_ptr(0));
+ return (err != -EEXIST) ? err : 0;
+}
+
+static void set_domain_irq_pirq(struct domain *d, int irq, int pirq)
+{
+ radix_tree_replace_slot(
+ radix_tree_lookup_slot(&d->arch.irq_pirq, irq),
+ radix_tree_int_to_ptr(pirq));
+ d->arch.pirq_irq[pirq] = irq;
+}
+
+static void clear_domain_irq_pirq(struct domain *d, int irq, int pirq)
+{
+ d->arch.pirq_irq[pirq] = 0;
+ radix_tree_replace_slot(
+ radix_tree_lookup_slot(&d->arch.irq_pirq, irq),
+ radix_tree_int_to_ptr(0));
+}
+
+static void cleanup_domain_irq_pirq(struct domain *d, int irq, int pirq)
+{
+ radix_tree_delete(&d->arch.irq_pirq, irq);
+}
+
+int init_domain_irq_mapping(struct domain *d)
+{
+ unsigned int i;
+ int err = 0;
+
+ radix_tree_init(&d->arch.irq_pirq);
+ if ( is_hvm_domain(d) )
+ radix_tree_init(&d->arch.hvm_domain.emuirq_pirq);
+
+ for ( i = 1; platform_legacy_irq(i); ++i )
+ {
+ if ( IO_APIC_IRQ(i) )
+ continue;
+ err = prepare_domain_irq_pirq(d, i, i);
+ if ( err )
+ break;
+ set_domain_irq_pirq(d, i, i);
+ }
+
+ if ( err )
+ cleanup_domain_irq_mapping(d);
+ return err;
+}
+
+void cleanup_domain_irq_mapping(struct domain *d)
+{
+ radix_tree_destroy(&d->arch.irq_pirq, NULL);
+ if ( is_hvm_domain(d) )
+ radix_tree_destroy(&d->arch.hvm_domain.emuirq_pirq, NULL);
+}
+
/* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */
static void flush_ready_eoi(void)
{
{
irq_guest_action_t *oldaction = NULL;
struct irq_desc *desc;
- int irq;
+ int irq = 0;
WARN_ON(!spin_is_locked(&d->event_lock));
BUG_ON(irq <= 0);
desc = irq_to_desc(irq);
spin_lock_irq(&desc->lock);
- d->arch.pirq_irq[pirq] = d->arch.irq_pirq[irq] = 0;
+ clear_domain_irq_pirq(d, irq, pirq);
}
else
{
kill_timer(&oldaction->eoi_timer);
xfree(oldaction);
}
+ else if ( irq > 0 )
+ cleanup_domain_irq_pirq(d, irq, pirq);
}
static int pirq_guest_force_unbind(struct domain *d, int irq)
return ret;
}
+ ret = prepare_domain_irq_pirq(d, irq, pirq);
+ if ( ret )
+ return ret;
+
desc = irq_to_desc(irq);
if ( type == MAP_PIRQ_TYPE_MSI )
dprintk(XENLOG_G_ERR, "dom%d: irq %d in use\n",
d->domain_id, irq);
desc->handler = &pci_msi_type;
- d->arch.pirq_irq[pirq] = irq;
- d->arch.irq_pirq[irq] = pirq;
+ set_domain_irq_pirq(d, irq, pirq);
setup_msi_irq(pdev, msi_desc, irq);
spin_unlock_irqrestore(&desc->lock, flags);
- } else
+ }
+ else
{
spin_lock_irqsave(&desc->lock, flags);
- d->arch.pirq_irq[pirq] = irq;
- d->arch.irq_pirq[irq] = pirq;
+ set_domain_irq_pirq(d, irq, pirq);
spin_unlock_irqrestore(&desc->lock, flags);
}
done:
+ if ( ret )
+ cleanup_domain_irq_pirq(d, irq, pirq);
return ret;
}
BUG_ON(irq != domain_pirq_to_irq(d, pirq));
if ( !forced_unbind )
- {
- d->arch.pirq_irq[pirq] = 0;
- d->arch.irq_pirq[irq] = 0;
- }
+ clear_domain_irq_pirq(d, irq, pirq);
else
{
d->arch.pirq_irq[pirq] = -irq;
- d->arch.irq_pirq[irq] = -pirq;
+ radix_tree_replace_slot(
+ radix_tree_lookup_slot(&d->arch.irq_pirq, irq),
+ radix_tree_int_to_ptr(-pirq));
}
spin_unlock_irqrestore(&desc->lock, flags);
if (msi_desc)
msi_free_irq(msi_desc);
+ if ( !forced_unbind )
+ cleanup_domain_irq_pirq(d, irq, pirq);
+
ret = irq_deny_access(d, pirq);
if ( ret )
dprintk(XENLOG_G_ERR, "dom%d: could not deny access to irq %d\n",
return 0;
}
- d->arch.pirq_emuirq[pirq] = emuirq;
/* do not store emuirq mappings for pt devices */
if ( emuirq != IRQ_PT )
- d->arch.emuirq_pirq[emuirq] = pirq;
+ {
+ int err = radix_tree_insert(&d->arch.hvm_domain.emuirq_pirq, emuirq,
+ radix_tree_int_to_ptr(pirq));
+
+ switch ( err )
+ {
+ case 0:
+ break;
+ case -EEXIST:
+ radix_tree_replace_slot(
+ radix_tree_lookup_slot(
+ &d->arch.hvm_domain.emuirq_pirq, emuirq),
+ radix_tree_int_to_ptr(pirq));
+ break;
+ default:
+ return err;
+ }
+ }
+ d->arch.pirq_emuirq[pirq] = emuirq;
return 0;
}
d->arch.pirq_emuirq[pirq] = IRQ_UNBOUND;
if ( emuirq != IRQ_PT )
- d->arch.emuirq_pirq[emuirq] = IRQ_UNBOUND;
+ radix_tree_delete(&d->arch.hvm_domain.emuirq_pirq, emuirq);
done:
return ret;